irqreturn_t evtchn_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
- u32 l1, l2;
+ unsigned long l1, l2;
unsigned int l1i, l2i, port;
irqreturn_t (*handler)(int, void *, struct pt_regs *);
shared_info_t *s = HYPERVISOR_shared_info;
while ( l1 != 0 )
{
l1i = __ffs(l1);
- l1 &= ~(1 << l1i);
+ l1 &= ~(1UL << l1i);
while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
{
l2i = __ffs(l2);
- l2 &= ~(1 << l2i);
+ l2 &= ~(1UL << l2i);
- port = (l1i << 5) + l2i;
+ port = (l1i * BITS_PER_LONG) + l2i;
if ( (handler = evtchns[port].handler) != NULL )
{
clear_evtchn(port);
* If SMP should be disabled, then really disable it!
*/
if (!max_cpus) {
- HYPERVISOR_shared_info->n_vcpu = 1;
+ xen_start_info->n_vcpu = 1;
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
smpboot_clear_io_apic_irqs();
#if 0
*/
Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
#endif
- Dprintk("CPU present map: %lx\n",
- (1UL << HYPERVISOR_shared_info->n_vcpu) - 1);
+ Dprintk("CPU present map: %lx\n", (1UL << xen_start_info->n_vcpu) - 1);
kicked = 1;
- for (cpu = 1; kicked < NR_CPUS &&
- cpu < HYPERVISOR_shared_info->n_vcpu; cpu++) {
+ for (cpu = 1; kicked < NR_CPUS && cpu < xen_start_info->n_vcpu; cpu++) {
if (max_cpus <= cpucount+1)
continue;
#ifdef CONFIG_SMP
-static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-static u32 cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/32];
+static u8 cpu_evtchn[NR_EVENT_CHANNELS];
+static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
#define active_evtchns(cpu,sh,idx) \
((sh)->evtchn_pending[idx] & \
/* NB. Interrupts are disabled on entry. */
asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
{
- u32 l1, l2;
+ unsigned long l1, l2;
unsigned int l1i, l2i, port;
int irq, cpu = smp_processor_id();
shared_info_t *s = HYPERVISOR_shared_info;
l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
while (l1 != 0) {
l1i = __ffs(l1);
- l1 &= ~(1 << l1i);
+ l1 &= ~(1UL << l1i);
while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
l2i = __ffs(l2);
- l2 &= ~(1 << l2i);
+ l2 &= ~(1UL << l2i);
- port = (l1i << 5) + l2i;
+ port = (l1i * BITS_PER_LONG) + l2i;
if ((irq = evtchn_to_irq[port]) != -1)
do_IRQ(irq, regs);
else
*/
if (!max_cpus) {
#ifdef CONFIG_XEN
- HYPERVISOR_shared_info->n_vcpu = 1;
+ xen_start_info->n_vcpu = 1;
#endif
printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
#ifndef CONFIG_XEN
int apicid = cpu_present_to_apicid(i);
if (physid_isset(apicid, phys_cpu_present_map)) {
#else
- if (i < HYPERVISOR_shared_info->n_vcpu) {
+ if (i < xen_start_info->n_vcpu) {
#endif
cpu_set(i, cpu_present_map);
/* possible map would be different if we supported real
}
-#define smp_found_config (HYPERVISOR_shared_info->n_vcpu > 1)
+#define smp_found_config (xen_start_info->n_vcpu > 1)
}
-#define smp_found_config (HYPERVISOR_shared_info->n_vcpu > 1)
+#define smp_found_config (xen_start_info->n_vcpu > 1)
* like a real IO-APIC we 'lose the interrupt edge' if the channel is
* masked.
*/
- if (synch_test_bit (port, &s->evtchn_pending[0]) &&
- !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel)) {
+ if (synch_test_bit(port, &s->evtchn_pending[0]) &&
+ !synch_test_and_set_bit(port / BITS_PER_LONG,
+ &vcpu_info->evtchn_pending_sel)) {
vcpu_info->evtchn_upcall_pending = 1;
if (!vcpu_info->evtchn_upcall_mask)
force_evtchn_callback();
start_info->store_evtchn = store_evtchn;
start_info->console_mfn = *console_mfn;
start_info->console_evtchn = console_evtchn;
+ start_info->n_vcpu = vcpus;
if ( initrd_len != 0 )
{
start_info->mod_start = vinitrd_start;
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- shared_info->n_vcpu = vcpus;
- printf(" VCPUS: %d\n", shared_info->n_vcpu);
-
munmap(shared_info, PAGE_SIZE);
/* Send the page update requests down to the hypervisor. */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- shared_info->n_vcpu = vcpus;
- printf(" VCPUS: %d\n", shared_info->n_vcpu);
-
munmap(shared_info, PAGE_SIZE);
/* Populate the event channel port in the shared page */
defaultInfo('on_crash', lambda: "restart")
defaultInfo('cpu', lambda: None)
defaultInfo('cpu_weight', lambda: 1.0)
- defaultInfo('vcpus', lambda: 1)
+ defaultInfo('vcpus', lambda: int(1))
defaultInfo('vcpu_avail', lambda: (1 << self.info['vcpus']) - 1)
defaultInfo('bootloader', lambda: None)
defaultInfo('backend', lambda: [])
*/
if (test_and_clear_bit(port,
&d->shared_info->evtchn_pending[0])) {
- clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
vmx_io_assist(v);
}
* nothing losed. Next loop will check I/O channel to fix this
* window.
*/
- clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+ clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
}
else
break;
/* Clear indicator specific to interrupt delivered from DM */
if (test_and_clear_bit(port,
&d->shared_info->evtchn_pending[0])) {
- if (!d->shared_info->evtchn_pending[port >> 5])
- clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+ if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
+ clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
if (!v->vcpu_info->evtchn_pending_sel)
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
/* Mask all upcalls... */
for ( i = 0; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1;
- d->shared_info->n_vcpu = num_online_cpus();
- for ( i = 1; i < d->shared_info->n_vcpu; i++ )
- (void)alloc_vcpu(d, i, i % num_online_cpus());
+ for ( i = 1; i < num_online_cpus(); i++ )
+ (void)alloc_vcpu(d, i, i);
/* Set up monitor table */
update_pagetables(v);
/* Set up start info area. */
si = (start_info_t *)vstartinfo_start;
memset(si, 0, PAGE_SIZE);
- si->nr_pages = nr_pages;
+ si->nr_pages = nr_pages;
+ si->n_vcpu = num_online_cpus();
if ( opt_dom0_translate )
{
struct domain *d = v->domain;
int port = iopacket_port(d);
- /* evtchn_pending is shared by other event channels in 0-31 range */
- if (!d->shared_info->evtchn_pending[port>>5])
- clear_bit(port>>5, &v->vcpu_info->evtchn_pending_sel);
+ /* evtchn_pending_sel bit is shared by other event channels. */
+ if (!d->shared_info->evtchn_pending[port/BITS_PER_LONG])
+ clear_bit(port/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel);
- /* Note: VMX domains may need upcalls as well */
+ /* Note: VMX domains may need upcalls as well. */
if (!v->vcpu_info->evtchn_pending_sel)
clear_bit(0, &v->vcpu_info->evtchn_upcall_pending);
- /* clear the pending bit for port */
+ /* Clear the pending bit for port. */
return test_and_clear_bit(port, &d->shared_info->evtchn_pending[0]);
}
break;
/* Events other than IOPACKET_PORT might have woken us up. In that
case, safely go back to sleep. */
- clear_bit(port>>5, ¤t->vcpu_info->evtchn_pending_sel);
+ clear_bit(port/BITS_PER_LONG, ¤t->vcpu_info->evtchn_pending_sel);
clear_bit(0, ¤t->vcpu_info->evtchn_upcall_pending);
} while(1);
}
&d->shared_info->evtchn_pending[0]),
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
&d->shared_info->evtchn_mask[0]),
- test_bit(v->virq_to_evtchn[VIRQ_DEBUG]>>5,
+ test_bit(v->virq_to_evtchn[VIRQ_DEBUG]/BITS_PER_LONG,
&v->vcpu_info->evtchn_pending_sel));
send_guest_virq(v, VIRQ_DEBUG);
}
unsigned long args[6];
} multicall_entry_t;
-/* Event channel endpoints per domain. */
-#define NR_EVENT_CHANNELS 1024
+/*
+ * Event channel endpoints per domain:
+ * 1024 if a long is 32 bits; 4096 if a long is 64 bits.
+ */
+#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
/*
* Per-VCPU information goes here. This will be cleaned up more when Xen
*/
uint8_t evtchn_upcall_pending;
uint8_t evtchn_upcall_mask;
- uint32_t evtchn_pending_sel;
+ unsigned long evtchn_pending_sel;
#ifdef __ARCH_HAS_VCPU_INFO
arch_vcpu_info_t arch;
#endif
vcpu_time_info_t vcpu_time[MAX_VIRT_CPUS];
- uint32_t n_vcpu;
-
/*
- * A domain can have up to 1024 "event channels" on which it can send
- * and receive asynchronous event notifications. There are three classes
- * of event that are delivered by this mechanism:
+ * A domain can create "event channels" on which it can send and receive
+ * asynchronous event notifications. There are three classes of event that
+ * are delivered by this mechanism:
* 1. Bi-directional inter- and intra-domain connections. Domains must
- * arrange out-of-band to set up a connection (usually the setup
- * is initiated and organised by a privileged third party such as
- * software running in domain 0).
+ * arrange out-of-band to set up a connection (usually by allocating
+ * an unbound 'listener' port and avertising that via a storage service
+ * such as xenstore).
* 2. Physical interrupts. A domain with suitable hardware-access
* privileges can bind an event-channel port to a physical interrupt
* source.
* port to a virtual interrupt source, such as the virtual-timer
* device or the emergency console.
*
- * Event channels are addressed by a "port index" between 0 and 1023.
- * Each channel is associated with two bits of information:
+ * Event channels are addressed by a "port index". Each channel is
+ * associated with two bits of information:
* 1. PENDING -- notifies the domain that there is a pending notification
* to be processed. This bit is cleared by the guest.
* 2. MASK -- if this bit is clear then a 0->1 transition of PENDING
*
* To expedite scanning of pending notifications, any 0->1 pending
* transition on an unmasked channel causes a corresponding bit in a
- * 32-bit selector to be set. Each bit in the selector covers a 32-bit
- * word in the PENDING bitfield array.
+ * per-vcpu selector word to be set. Each bit in the selector covers a
+ * 'C long' in the PENDING bitfield array.
*/
- uint32_t evtchn_pending[32];
- uint32_t evtchn_mask[32];
+ unsigned long evtchn_pending[sizeof(unsigned long) * 8];
+ unsigned long evtchn_mask[sizeof(unsigned long) * 8];
/*
* Wallclock time: updated only by control software. Guests should base
unsigned long mfn_list; /* VIRTUAL address of page-frame list. */
unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+ uint32_t n_vcpu;
int8_t cmd_line[MAX_GUEST_CMDLINE];
} start_info_t;
shared_info_t *s = d->shared_info;
/* These four operations must happen in strict order. */
- if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
- !test_bit (port, &s->evtchn_mask[0]) &&
- !test_and_set_bit(port>>5, &v->vcpu_info->evtchn_pending_sel) &&
- !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
+ if ( !test_and_set_bit(port, &s->evtchn_pending[0]) &&
+ !test_bit (port, &s->evtchn_mask[0]) &&
+ !test_and_set_bit(port / BITS_PER_LONG,
+ &v->vcpu_info->evtchn_pending_sel) &&
+ !test_and_set_bit(0, &v->vcpu_info->evtchn_upcall_pending) )
{
evtchn_notify(v);
}
/* A global pointer to the initial domain (DOM0). */
extern struct domain *dom0;
-#define MAX_EVTCHNS 1024
+#define MAX_EVTCHNS NR_EVENT_CHANNELS
#define EVTCHNS_PER_BUCKET 128
#define NR_EVTCHN_BUCKETS (MAX_EVTCHNS / EVTCHNS_PER_BUCKET)